/* hardware assisted paging bits */
extern int opt_hap_enabled;
-extern int hap_capable_system;
static inline void svm_inject_exception(struct vcpu *v, int trap,
int ev, int error_code)
{
u32 eax, ebx, ecx, edx;
- /* check CPUID for nested paging support */
+ /* Check CPUID for nested paging support. */
cpuid(0x8000000A, &eax, &ebx, &ecx, &edx);
- if ( edx & 0x01 ) /* nested paging */
- {
- hap_capable_system = 1;
- }
- else if ( opt_hap_enabled )
+
+ if ( !(edx & 1) && opt_hap_enabled )
{
- printk(" nested paging is not supported by this CPU.\n");
- hap_capable_system = 0; /* no nested paging, we disable flag. */
+ printk("SVM: Nested paging is not supported by this CPU.\n");
+ opt_hap_enabled = 0;
}
}
ecx = cpuid_ecx(0x80000001);
boot_cpu_data.x86_capability[5] = ecx;
- if (!(test_bit(X86_FEATURE_SVME, &boot_cpu_data.x86_capability)))
+ if ( !(test_bit(X86_FEATURE_SVME, &boot_cpu_data.x86_capability)) )
return 0;
/* check whether SVM feature is disabled in BIOS */
return 0;
}
- if ( (hsa[cpu] == NULL) && ((hsa[cpu] = alloc_host_save_area()) == NULL) )
+ if ( ((hsa[cpu] = alloc_host_save_area()) == NULL) ||
+ ((root_vmcb[cpu] = alloc_vmcb()) == NULL) )
return 0;
-
+
rdmsr(MSR_EFER, eax, edx);
eax |= EFER_SVME;
wrmsr(MSR_EFER, eax, edx);
- printk("AMD SVM Extension is enabled for cpu %d.\n", cpu );
svm_npt_detect();
phys_hsa_lo = (u32) phys_hsa;
phys_hsa_hi = (u32) (phys_hsa >> 32);
wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
-
- if ( (root_vmcb[cpu] == NULL) &&
- ((root_vmcb[cpu] = alloc_vmcb()) == NULL) )
- return 0;
- root_vmcb_pa[cpu] = virt_to_maddr(root_vmcb[cpu]);
+ root_vmcb_pa[cpu] = virt_to_maddr(root_vmcb[cpu]);
+
if ( cpu != 0 )
return 1;
#include <asm/hap.h>
/* Xen command-line option to enable hardware-assisted paging */
-int opt_hap_enabled = 0;
+int opt_hap_enabled;
boolean_param("hap", opt_hap_enabled);
-int hap_capable_system = 0;
/* Printouts */
#define PAGING_PRINTK(_f, _a...) \
p2m_init(d);
shadow_domain_init(d);
- if ( opt_hap_enabled && hap_capable_system && is_hvm_domain(d) )
+ if ( opt_hap_enabled && is_hvm_domain(d) )
hap_domain_init(d);
}
/* vcpu paging struct initialization goes here */
void paging_vcpu_init(struct vcpu *v)
{
- if ( opt_hap_enabled && hap_capable_system && is_hvm_vcpu(v) )
+ if ( opt_hap_enabled && is_hvm_vcpu(v) )
hap_vcpu_init(v);
else
shadow_vcpu_init(v);
XEN_GUEST_HANDLE(void) u_domctl)
{
/* Here, dispatch domctl to the appropriate paging code */
- if ( opt_hap_enabled && hap_capable_system && is_hvm_domain(d) )
+ if ( opt_hap_enabled && is_hvm_domain(d) )
return hap_domctl(d, sc, u_domctl);
else
return shadow_domctl(d, sc, u_domctl);
/* Call when destroying a domain */
void paging_teardown(struct domain *d)
{
- if ( opt_hap_enabled && hap_capable_system && is_hvm_domain(d) )
+ if ( opt_hap_enabled && is_hvm_domain(d) )
hap_teardown(d);
else
shadow_teardown(d);
/* Call once all of the references to the domain have gone away */
void paging_final_teardown(struct domain *d)
{
- if ( opt_hap_enabled && hap_capable_system && is_hvm_domain(d) )
+ if ( opt_hap_enabled && is_hvm_domain(d) )
hap_final_teardown(d);
else
shadow_final_teardown(d);
* creation. */
int paging_enable(struct domain *d, u32 mode)
{
- if ( opt_hap_enabled && hap_capable_system && is_hvm_domain(d) )
+ if ( opt_hap_enabled && is_hvm_domain(d) )
return hap_enable(d, mode | PG_HAP_enable);
else
return shadow_enable(d, mode | PG_SH_enable);